python web crawling(1)

web crawling examples with python using urllib,beautifulsoup,re,requests,

python3.6爬取糗事百科热门帖子例子/web crawling qiushibaike.com using python3.6

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#coding:utf-8
import urllib
import re
import urllib.request
import _thread
import time
class QSBK:
#初始化方法,定义一些变量
def __init__(self):
self.pageIndex = 1
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = {'User-agent':self.user_agent}
#存放段子的变量,每一个元素是每一页的段子们
self.stories = []
#存放变量是否继续运行的变量
self.enable = False
def getPage(self,pageIndex):#获取页面的HTML文件内容
try:
url = 'https://www.qiushibaike.com/hot/' + str(pageIndex)
req = urllib.request.Request(url,headers=self.headers)
response = urllib.request.urlopen(req)
pageCode = response.read().decode('utf-8')
return pageCode
except urllib.error.URLError as e:
if hasattr(e, "reason"):
print("连接糗事百科失败,原因:",e.reason)
return None
def getPageItems(self,pageIndex):
#解析HTML文件
pageCode = self.getPage(pageIndex)
if not pageCode:
print('页面加载失败。。。')
return None
#用正则表达式匹配出作者,段子内容,段子里面的图片,点赞数
pattern = re.compile(
'<div.*?author clearfix">.*?<a.*?<h2.*?>(.*?)</h2>.*?<div.*?content">.*?<span.*?>(.*?)</span>(.*?)'
'<div class="stats.*?class="number">(.*?)</i>',
re.S)
items = re.findall(pattern, pageCode)
pageStories = []
#遍历items,找出不含img的段子
for item in items:
haveImg = re.search("img", item[2])
if not haveImg:
#除去字符br
replaceBR = re.compile('<br/>')
text = re.sub(replaceBR,"\n",item[1])
pageStories.append([item[0].strip(),text.strip(),item[2].strip(),item[3].strip()])
return pageStories
#判断页数,从第一页开始输出
def loadPage(self):
if self.enable == True:
if len(self.stories) < 2:
pageStories = self.getPageItems(self.pageIndex)
if pageStories:
self.stories.append(pageStories)
self.pageIndex += 1
#每次输出一个段子
def getOneStory(self,pageStories,page):
#遍历pageStories
for story in pageStories:
i = input()
self.loadPage()
if i == "Q":
self.enable = False
return
print(u"第%d页\n发布人:%s\n赞:%s\n%s"%(page,story[0],story[3],story[1]))
#主程序
def start(self):
print(u"正在读取糗事百科,按回车键查看新段子,Q退出")
self.enable = True
self.loadPage()
nowpage = 0
while self.enable:
if len(self.stories)>0:
pageStories = self.stories[0]
nowpage += 1
del self.stories[0]
self.getOneStory(pageStories,nowpage)
#程序入口
if __name__=="__main__":
spider = QSBK()
spider.start()

repost

python2.7爬取百度贴吧帖子/web crawling tieba.baidu.com using python2.7

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#/usr/bin/env python
# -*- coding: UTF-8 -*-
import urllib
import urllib2
import re
#处理页面标签类
class Tool:
#去除img标签,7位长空格
removeImg = re.compile('<img.*?>| {7}|')
#删除超链接标签
removeAddr = re.compile('<a.*?>|</a>')
#把换行的标签换为\n
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
#将表格制表<td>替换为\t
replaceTD= re.compile('<td>')
#把段落开头换为\n加空两格
replacePara = re.compile('<p.*?>')
#将换行符或双换行符替换为\n
replaceBR = re.compile('<br><br>|<br>')
#将其余标签剔除
removeExtraTag = re.compile('<.*?>')
def replace(self,x):
x = re.sub(self.removeImg,"",x)
x = re.sub(self.removeAddr,"",x)
x = re.sub(self.replaceLine,"\n",x)
x = re.sub(self.replaceTD,"\t",x)
x = re.sub(self.replacePara,"\n ",x)
x = re.sub(self.replaceBR,"\n",x)
x = re.sub(self.removeExtraTag,"",x)
#strip()将前后多余内容删除
return x.strip()
#百度贴吧爬虫类
class BDTB:
#初始化,传入基地址,是否只看楼主的参数
def __init__(self,baseUrl,seeLZ,floorTag):
#base链接地址
self.baseURL = baseUrl
#是否只看楼主
self.seeLZ = '?see_lz='+str(seeLZ)
#HTML标签剔除工具类对象
self.tool = Tool()
#全局file变量,文件写入操作对象
self.file = None
#楼层标号,初始为1
self.floor = 1
#默认的标题,如果没有成功获取到标题的话则会用这个标题
self.defaultTitle = u"百度贴吧"
#是否写入楼分隔符的标记
self.floorTag = floorTag
#传入页码,获取该页帖子的代码
def getPage(self,pageNum):
try:
#构建URL
url = self.baseURL+ self.seeLZ + '&pn=' + str(pageNum)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
#返回UTF-8格式编码内容
return response.read().decode('utf-8')
#无法连接,报错
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"连接百度贴吧失败,错误原因",e.reason
return None
#获取帖子标题
def getTitle(self,page):
#得到标题的正则表达式
pattern = re.compile('<h3 class="core_title_txt.*?>(.*?)</h1>',re.S)
result = re.search(pattern,page)
if result:
#如果存在,则返回标题
return result.group(1).strip()
else:
return None
#获取帖子一共有多少页
def getPageNum(self,page):
#获取帖子页数的正则表达式
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
result = re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
#获取每一层楼的内容,传入页面内容
def getContent(self,page):
#匹配所有楼层的内容
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
items = re.findall(pattern,page)
contents = []
for item in items:
#将文本进行去除标签处理,同时在前后加入换行符
content = "\n"+self.tool.replace(item)+"\n"
contents.append(content.encode('utf-8'))
return contents
def setFileTitle(self,title):
#如果标题不是为None,即成功获取到标题
if title is not None:
self.file = open(title + ".txt","w+")
else:
self.file = open(self.defaultTitle + ".txt","w+")
def writeData(self,contents):
#向文件写入每一楼的信息
for item in contents:
if self.floorTag == '1':
#楼之间的分隔符
floorLine = "\n" + str(self.floor) + u"-----------------------------------------------------------------------------------------\n"
self.file.write(floorLine)
self.file.write(item)
self.floor += 1
def start(self):
indexPage = self.getPage(1)
pageNum = self.getPageNum(indexPage)
title = self.getTitle(indexPage)
self.setFileTitle(title)
if pageNum == None:
print "URL已失效,请重试"
return
try:
print "该帖子共有" + str(pageNum) + "页"
for i in range(1,int(pageNum)+1):
print "正在写入第" + str(i) + "页数据"
page = self.getPage(i)
contents = self.getContent(page)
self.writeData(contents)
#出现写入异常
except IOError,e:
print "写入异常,原因" + e.message
finally:
print "写入任务完成"
print u"请输入帖子代号"
baseURL = 'http://tieba.baidu.com/p/' + str(raw_input(u'http://tieba.baidu.com/p/'))
seeLZ = raw_input("是否只获取楼主发言,是输入1,否输入0\n")
floorTag = raw_input("是否写入楼层信息,是输入1,否输入0\n")
bdtb = BDTB(baseURL,seeLZ,floorTag)
bdtb.start()

repost

web crawling mzitu.com using python3.6

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from bs4 import BeautifulSoup
import os
import requests
class mzitu():
def all_url(self, url):
html = self.request(url)##调用request函数把套图地址传进去会返回给我们一个response
all_a = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('a')
for a in all_a:
title = a.get_text()
print(u'开始保存:', title) ##加点提示不然太枯燥了
path = str(title).replace("?", '_') ##我注意到有个标题带有 ? 这个符号Windows系统是不能创建文件夹的所以要替换掉
self.mkdir(path) ##调用mkdir函数创建文件夹!这儿path代表的是标题title哦!!!!!不要糊涂了哦!
href = a['href']
self.html(href) ##调用html函数把href参数传递过去!href是啥还记的吧? 就是套图的地址哦!!不要迷糊了哦!
def html(self, href): ##这个函数是处理套图地址获得图片的页面地址
html = self.request(href)
max_span = BeautifulSoup(html.text, 'lxml').find('div', class_='pagenavi').find_all('span')[-2].get_text()
for page in range(1, int(max_span) + 1):
page_url = href + '/' + str(page)
self.img(page_url) ##调用img函数
def img(self, page_url): ##这个函数处理图片页面地址获得图片的实际地址
img_html = self.request(page_url)
img_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image').find('img')['src']
self.save(img_url)
def save(self, img_url): ##这个函数保存图片
name = img_url[-9:-4]
img = self.request(img_url)
f = open(name + '.jpg', 'ab')
f.write(img.content)
f.close()
def mkdir(self, path): ##这个函数创建文件夹
path = path.strip()
isExists = os.path.exists(os.path.join("D:\mzitu", path))
if not isExists:
print(u'建了一个名字叫做', path, u'的文件夹!')
os.makedirs(os.path.join("D:\mzitu", path))
os.chdir(os.path.join("D:\mzitu", path)) ##切换到目录
return True
else:
print(u'名字叫做', path, u'的文件夹已经存在了!')
return False
def request(self, url):
headers = {
'Referer': url,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'}
content = requests.get(url, headers=headers)
return content
Mzitu = mzitu() ##实例化
Mzitu.all_url('http://www.mzitu.com/all') ##给函数all_url传入参数 你可以当作启动爬虫(就是入口)

repost

python3.6爬取豆瓣上映电影评分/web crawling douban.com using python3.6

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}##浏览器请求头(大部分网站没有这个请求头会报错、请务必加上哦)
all_url = 'https://movie.douban.com/nowplaying/beijing/' ##开始的URL地址
start_html = requests.get(all_url, headers=headers)
soup = BeautifulSoup(start_html.text, 'lxml')
nowplaying_movie = soup.find_all('div', id='nowplaying')
nowplaying_movie_list = nowplaying_movie[0].find_all('li', class_='list-item')
nowplaying_list = []
for item in nowplaying_movie_list:
nowplaying_dict = {}
nowplaying_dict['id'] = item['data-subject']
nowplaying_dict['score'] = item['data-score']
for tag_img_item in item.find_all('img'):
nowplaying_dict['name'] = tag_img_item['alt']
nowplaying_list.append(nowplaying_dict)
print(nowplaying_list)
1
2
3
4
5
6
7
8
9
10
11
12
13
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}##浏览器请求头(大部分网站没有这个请求头会报错、请务必加上哦)
all_url = 'https://movie.douban.com/subject/26363254/comments' ##开始的URL地址
start_html = requests.get(all_url, headers=headers)
soup = BeautifulSoup(start_html.text, 'lxml')
comment_div_lits = soup.find_all('div', class_='comment')
eachCommentList = [];
for item in comment_div_lits:
if item.find_all('p')[0].string is not None:
eachCommentList.append(item.find_all('p')[0].string)
print(eachCommentList)

repost

taobao search web crawling using python3.6

# encoding=utf8
import requests
import re

#获取text
def getHTMLText(url):
    try:
        r = requests.get(url, timeout = 30)
        r.raise_for_status()
        r.encoding= r.apparent_encoding
        return r.text
    except:
        return ""


def paserPage(list,html):
    try:
        plt = re.findall(r'\"view_price\"\:\"[\d.]*\"',html)
        tlt = re.findall(r'\"raw_title\"\:\".*?\"',html)
        for i in range(len(plt)):
            price = eval(plt[i].split(':')[1])
            title = eval(tlt[i].split(':')[1])
            list.append([price,title])
    except:
        print("出丑") 


def printGoodsList(list):
    tplt ="{:4}\t{:8}\t{:16}"
    print(tplt.format("序号", "价格", "商品"))
    count = 0
    for g in list:
        count=count+1
        print(tplt.format(count,g[0],g[1]))


def main():
    goods = '羽绒服'
    depth = 3 #爬取页数
    start_url = 'https://s.taobao.com/search?q=' + goods + '&sort=sale-desc'
    infoList = []
    for i in range(depth):
        try:
            url = start_url + '&s=' + str(44*i)
            html = getHTMLText(url)
            #print(html)
            paserPage(infoList,html)
        except:
            continue
    #print(infoList)
    printGoodsList(infoList)

main()

repost